return -EINVAL;
if ( !may_switch_mode(d) )
return -EACCES;
- if ( !IS_COMPAT(d) )
+ if ( !is_pv_32on64_domain(d) )
return 0;
- d->is_compat = 0;
+ d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 0;
release_arg_xlat_area(d);
/* switch gdt */
return -ENOSYS;
if ( !may_switch_mode(d) )
return -EACCES;
- if ( IS_COMPAT(d) )
+ if ( is_pv_32on64_domain(d) )
return 0;
- d->is_compat = 1;
+ d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 1;
/* switch gdt */
gdt_l1e = l1e_from_page(virt_to_page(compat_gdt_table), PAGE_HYPERVISOR);
v->arch.perdomain_ptes =
d->arch.mm_perdomain_pt + (v->vcpu_id << GDT_LDT_VCPU_SHIFT);
- return (pv_32on64_vcpu(v) ? setup_compat_l4(v) : 0);
+ return (is_pv_32on64_vcpu(v) ? setup_compat_l4(v) : 0);
}
void vcpu_destroy(struct vcpu *v)
{
- if ( pv_32on64_vcpu(v) )
+ if ( is_pv_32on64_vcpu(v) )
release_compat_l4(v);
}
virt_to_page(d->shared_info), d, XENSHARE_writable);
}
- return is_hvm_domain(d) ? hvm_domain_initialise(d) : 0;
+ if ( is_hvm_domain(d) )
+ {
+ if ( (rc = hvm_domain_initialise(d)) != 0 )
+ goto fail;
+ }
+ else
+ {
+ /* 32-bit PV guest by default only if Xen is not 64-bit. */
+ d->arch.is_32bit_pv = d->arch.has_32bit_shinfo =
+ (CONFIG_PAGING_LEVELS != 4);
+ }
+
+
+ return 0;
fail:
free_xenheap_page(d->shared_info);
free_domheap_page(virt_to_page(d->arch.mm_perdomain_l3));
#endif
- if ( pv_32on64_domain(d) )
+ if ( is_pv_32on64_domain(d) )
release_arg_xlat_area(d);
free_xenheap_page(d->shared_info);
/* The context is a compat-mode one if the target domain is compat-mode;
* we expect the tools to DTRT even in compat-mode callers. */
- compat = pv_32on64_domain(d);
+ compat = is_pv_32on64_domain(d);
#ifdef CONFIG_COMPAT
#define c(fld) (compat ? (c.cmp->fld) : (c.nat->fld))
all_segs_okay &= loadsegment(gs, nctxt->user_regs.gs);
}
- if ( !IS_COMPAT(n->domain) )
+ if ( !is_pv_32on64_domain(n->domain) )
{
/* This can only be non-zero if selector is NULL. */
if ( nctxt->fs_base )
(unsigned long *)nctxt->kernel_sp;
unsigned long cs_and_mask, rflags;
- if ( IS_COMPAT(n->domain) )
+ if ( is_pv_32on64_domain(n->domain) )
{
unsigned int *esp = ring_1(regs) ?
(unsigned int *)regs->rsp :
if ( regs->es )
dirty_segment_mask |= DIRTY_ES;
- if ( regs->fs || IS_COMPAT(v->domain) )
+ if ( regs->fs || is_pv_32on64_domain(v->domain) )
{
dirty_segment_mask |= DIRTY_FS;
ctxt->fs_base = 0; /* != 0 selector kills fs_base */
dirty_segment_mask |= DIRTY_FS_BASE;
}
- if ( regs->gs || IS_COMPAT(v->domain) )
+ if ( regs->gs || is_pv_32on64_domain(v->domain) )
{
dirty_segment_mask |= DIRTY_GS;
ctxt->gs_base_user = 0; /* != 0 selector kills gs_base_user */
__context_switch();
#ifdef CONFIG_COMPAT
- if ( is_idle_vcpu(prev)
- || IS_COMPAT(prev->domain) != IS_COMPAT(next->domain) )
+ if ( is_idle_vcpu(prev) ||
+ (is_pv_32on64_domain(prev->domain) !=
+ is_pv_32on64_domain(next->domain)) )
{
uint32_t efer_lo, efer_hi;
- local_flush_tlb_one(GDT_VIRT_START(next) + FIRST_RESERVED_GDT_BYTE);
+ local_flush_tlb_one(GDT_VIRT_START(next) +
+ FIRST_RESERVED_GDT_BYTE);
rdmsr(MSR_EFER, efer_lo, efer_hi);
- if ( !IS_COMPAT(next->domain) == !(efer_lo & EFER_SCE) )
+ if ( !is_pv_32on64_domain(next->domain) == !(efer_lo & EFER_SCE) )
{
efer_lo ^= EFER_SCE;
wrmsr(MSR_EFER, efer_lo, efer_hi);
/* Update per-VCPU guest runstate shared memory area (if registered). */
if ( !guest_handle_is_null(runstate_guest(next)) )
{
- if ( !IS_COMPAT(next->domain) )
+ if ( !is_pv_32on64_domain(next->domain) )
__copy_to_guest(runstate_guest(next), &next->runstate, 1);
#ifdef CONFIG_COMPAT
else
for ( i = 0; *p != '\0'; i++ )
mcs->call.args[i] = next_arg(p, args);
- if ( IS_COMPAT(current->domain) )
+ if ( is_pv_32on64_domain(current->domain) )
{
for ( ; i < 6; i++ )
mcs->call.args[i] = 0;
regs->eip -= 2; /* re-execute 'syscall' / 'int 0x82' */
#ifdef __x86_64__
- if ( !IS_COMPAT(current->domain) )
+ if ( !is_pv_32on64_domain(current->domain) )
{
for ( i = 0; *p != '\0'; i++ )
{
unsigned long pfn;
#ifdef __x86_64__
- if ( pv_32on64_vcpu(v) )
+ if ( is_pv_32on64_vcpu(v) )
{
pfn = l4e_get_pfn(*(l4_pgentry_t *)
__va(pagetable_get_paddr(v->arch.guest_table)));
}
#ifdef CONFIG_COMPAT
- if (compat32)
+ if ( compat32 )
{
l1_pgentry_t gdt_l1e;
- d->is_compat = 1;
+ d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 1;
v->vcpu_info = (void *)&d->shared_info->compat.vcpu_info[0];
if ( nr_pages != (unsigned int)nr_pages )
#if CONFIG_PAGING_LEVELS < 4
unsigned long mask = (1UL << L2_PAGETABLE_SHIFT) - 1;
#else
- unsigned long mask = !IS_COMPAT(d)
- ? (1UL << L4_PAGETABLE_SHIFT) - 1
- : (1UL << L2_PAGETABLE_SHIFT) - 1;
+ unsigned long mask = is_pv_32bit_domain(d)
+ ? (1UL << L2_PAGETABLE_SHIFT) - 1
+ : (1UL << L4_PAGETABLE_SHIFT) - 1;
#endif
value = (parms.virt_hv_start_low + mask) & ~mask;
#ifdef CONFIG_COMPAT
HYPERVISOR_COMPAT_VIRT_START(d) =
max_t(unsigned int, m2p_compat_vstart, value);
- d->arch.physaddr_bitsize = !IS_COMPAT(d) ? 64 :
+ d->arch.physaddr_bitsize = !is_pv_32on64_domain(d) ? 64 :
fls((1UL << 32) - HYPERVISOR_COMPAT_VIRT_START(d)) - 1
+ (PAGE_SIZE - 2);
- if ( value > (!IS_COMPAT(d) ?
+ if ( value > (!is_pv_32on64_domain(d) ?
HYPERVISOR_VIRT_START :
__HYPERVISOR_COMPAT_VIRT_START) )
#else
vinitrd_start = round_pgup(vkern_end);
vinitrd_end = vinitrd_start + initrd_len;
vphysmap_start = round_pgup(vinitrd_end);
- vphysmap_end = vphysmap_start + (nr_pages * (!IS_COMPAT(d) ?
+ vphysmap_end = vphysmap_start + (nr_pages * (!is_pv_32on64_domain(d) ?
sizeof(unsigned long) :
sizeof(unsigned int)));
vstartinfo_start = round_pgup(vphysmap_end);
((_l) & ~((1UL<<(_s))-1))) >> (_s))
if ( (1 + /* # L4 */
NR(v_start, v_end, L4_PAGETABLE_SHIFT) + /* # L3 */
- (!IS_COMPAT(d) ?
+ (!is_pv_32on64_domain(d) ?
NR(v_start, v_end, L3_PAGETABLE_SHIFT) : /* # L2 */
4) + /* # compat L2 */
NR(v_start, v_end, L2_PAGETABLE_SHIFT)) /* # L1 */
#elif defined(__x86_64__)
/* Overlap with Xen protected area? */
- if ( !IS_COMPAT(d) ?
+ if ( !is_pv_32on64_domain(d) ?
((v_start < HYPERVISOR_VIRT_END) &&
(v_end > HYPERVISOR_VIRT_START)) :
(v_end > HYPERVISOR_COMPAT_VIRT_START(d)) )
return -EINVAL;
}
- if ( IS_COMPAT(d) )
+ if ( is_pv_32on64_domain(d) )
{
v->arch.guest_context.failsafe_callback_cs = FLAT_COMPAT_KERNEL_CS;
v->arch.guest_context.event_callback_cs = FLAT_COMPAT_KERNEL_CS;
}
/* WARNING: The new domain must have its 'processor' field filled in! */
- if ( !IS_COMPAT(d) )
+ if ( !is_pv_32on64_domain(d) )
{
maddr_to_page(mpt_alloc)->u.inuse.type_info = PGT_l4_page_table;
l4start = l4tab = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
l4tab[l4_table_offset(PERDOMAIN_VIRT_START)] =
l4e_from_paddr(__pa(d->arch.mm_perdomain_l3), __PAGE_HYPERVISOR);
v->arch.guest_table = pagetable_from_paddr(__pa(l4start));
- if ( IS_COMPAT(d) )
+ if ( is_pv_32on64_domain(d) )
{
v->arch.guest_table_user = v->arch.guest_table;
if ( setup_arg_xlat_area(v, l4start) < 0 )
*l2tab = l2e_from_paddr(__pa(l1start), L2_PROT);
l2tab++;
}
- *l1tab = l1e_from_pfn(mfn, !IS_COMPAT(d) ? L1_PROT : COMPAT_L1_PROT);
+ *l1tab = l1e_from_pfn(mfn, (!is_pv_32on64_domain(d) ?
+ L1_PROT : COMPAT_L1_PROT));
l1tab++;
page = mfn_to_page(mfn);
}
#ifdef CONFIG_COMPAT
- if ( IS_COMPAT(d) )
+ if ( is_pv_32on64_domain(d) )
{
/* Ensure the first four L3 entries are all populated. */
for ( i = 0, l3tab = l3start; i < 4; ++i, ++l3tab )
/* Top-level p.t. is pinned. */
if ( (page->u.inuse.type_info & PGT_type_mask) ==
- (!IS_COMPAT(d) ? PGT_l4_page_table : PGT_l3_page_table) )
+ (!is_pv_32on64_domain(d) ?
+ PGT_l4_page_table : PGT_l3_page_table) )
{
page->count_info += 1;
page->u.inuse.type_info += 1 | PGT_pinned;
si->shared_info = virt_to_maddr(d->shared_info);
si->flags = SIF_PRIVILEGED | SIF_INITDOMAIN;
- si->pt_base = vpt_start + 2 * PAGE_SIZE * !!IS_COMPAT(d);
+ si->pt_base = vpt_start + 2 * PAGE_SIZE * !!is_pv_32on64_domain(d);
si->nr_pt_frames = nr_pt_pages;
si->mfn_list = vphysmap_start;
snprintf(si->magic, sizeof(si->magic), "xen-%i.%i-x86_%d%s",
if ( pfn > REVERSE_START )
mfn = alloc_epfn - (pfn - REVERSE_START);
#endif
- if ( !IS_COMPAT(d) )
+ if ( !is_pv_32on64_domain(d) )
((unsigned long *)vphysmap_start)[pfn] = mfn;
else
((unsigned int *)vphysmap_start)[pfn] = mfn;
#ifndef NDEBUG
#define pfn (nr_pages - 1 - (pfn - (alloc_epfn - alloc_spfn)))
#endif
- if ( !IS_COMPAT(d) )
+ if ( !is_pv_32on64_domain(d) )
((unsigned long *)vphysmap_start)[pfn] = mfn;
else
((unsigned int *)vphysmap_start)[pfn] = mfn;
}
#ifdef CONFIG_COMPAT
- if ( IS_COMPAT(d) )
+ if ( is_pv_32on64_domain(d) )
xlat_start_info(si, XLAT_start_info_console_dom0);
#endif
* [EAX,EBX,ECX,EDX,EDI,EBP are zero]
*/
regs = &v->arch.guest_context.user_regs;
- regs->ds = regs->es = regs->fs = regs->gs = !IS_COMPAT(d)
- ? FLAT_KERNEL_DS
- : FLAT_COMPAT_KERNEL_DS;
- regs->ss = !IS_COMPAT(d) ? FLAT_KERNEL_SS : FLAT_COMPAT_KERNEL_SS;
- regs->cs = !IS_COMPAT(d) ? FLAT_KERNEL_CS : FLAT_COMPAT_KERNEL_CS;
+ regs->ds = regs->es = regs->fs = regs->gs =
+ !is_pv_32on64_domain(d) ? FLAT_KERNEL_DS : FLAT_COMPAT_KERNEL_DS;
+ regs->ss = (!is_pv_32on64_domain(d) ?
+ FLAT_KERNEL_SS : FLAT_COMPAT_KERNEL_SS);
+ regs->cs = (!is_pv_32on64_domain(d) ?
+ FLAT_KERNEL_CS : FLAT_COMPAT_KERNEL_CS);
regs->eip = parms.virt_entry;
regs->esp = vstack_end;
regs->esi = vstartinfo_start;
void arch_get_info_guest(struct vcpu *v, vcpu_guest_context_u c)
{
#ifdef CONFIG_COMPAT
-#define c(fld) (!IS_COMPAT(v->domain) ? (c.nat->fld) : (c.cmp->fld))
+#define c(fld) (!is_pv_32on64_domain(v->domain) ? (c.nat->fld) : (c.cmp->fld))
#else
#define c(fld) (c.nat->fld)
#endif
- if ( !IS_COMPAT(v->domain) )
+ if ( !is_pv_32on64_domain(v->domain) )
memcpy(c.nat, &v->arch.guest_context, sizeof(*c.nat));
#ifdef CONFIG_COMPAT
else
if ( is_hvm_vcpu(v) )
{
- if ( !IS_COMPAT(v->domain) )
+ if ( !is_pv_32on64_domain(v->domain) )
hvm_store_cpu_guest_regs(v, &c.nat->user_regs, c.nat->ctrlreg);
#ifdef CONFIG_COMPAT
else
BUG_ON((c(user_regs.eflags) & EF_IOPL) != 0);
c(user_regs.eflags |= v->arch.iopl << 12);
- if ( !IS_COMPAT(v->domain) )
+ if ( !is_pv_32on64_domain(v->domain) )
{
c.nat->ctrlreg[3] = xen_pfn_to_cr3(
pagetable_get_pfn(v->arch.guest_table));
break;
case HVM_PARAM_CALLBACK_IRQ:
hvm_set_callback_via(d, a.value);
-#if defined(__x86_64__)
/*
* Since this operation is one of the very first executed
* by PV drivers on initialisation or after save/restore, it
* is a sensible point at which to sample the execution mode of
* the guest and latch 32- or 64-bit format for shared state.
*/
- d->is_compat = (hvm_guest_x86_mode(current) == 4);
-#endif
+ d->arch.has_32bit_shinfo = (hvm_guest_x86_mode(current) != 8);
break;
}
d->arch.hvm_domain.params[a.index] = a.value;
else
{
/* Odd pages: va for previous ma. */
- if ( IS_COMPAT(dom0) )
+ if ( is_pv_32on64_domain(dom0) )
{
-
/*
* The compatability bounce code sets up a page table
* with a 1-1 mapping of the first 1G of memory so
void machine_kexec(xen_kexec_image_t *image)
{
#ifdef CONFIG_COMPAT
- if ( IS_COMPAT(dom0) )
+ if ( is_pv_32on64_domain(dom0) )
{
extern void compat_machine_kexec(unsigned long rnk,
unsigned long indirection_page,
#ifdef CONFIG_COMPAT
l2_pgentry_t *compat_idle_pg_table_l2 = NULL;
-#define l3_disallow_mask(d) (!IS_COMPAT(d) ? \
- L3_DISALLOW_MASK : \
+#define l3_disallow_mask(d) (!is_pv_32on64_domain(d) ? \
+ L3_DISALLOW_MASK : \
COMPAT_L3_DISALLOW_MASK)
#else
#define l3_disallow_mask(d) L3_DISALLOW_MASK
#define adjust_guest_l1e(pl1e, d) \
do { \
if ( likely(l1e_get_flags((pl1e)) & _PAGE_PRESENT) && \
- likely(!IS_COMPAT(d)) ) \
+ likely(!is_pv_32on64_domain(d)) ) \
{ \
/* _PAGE_GUEST_KERNEL page cannot have the Global bit set. */ \
if ( (l1e_get_flags((pl1e)) & (_PAGE_GUEST_KERNEL|_PAGE_GLOBAL)) \
#define adjust_guest_l1e(pl1e, d) \
do { \
if ( likely(l1e_get_flags((pl1e)) & _PAGE_PRESENT) && \
- likely(!IS_COMPAT(d)) ) \
+ likely(!is_pv_32on64_domain(d)) ) \
l1e_add_flags((pl1e), _PAGE_USER); \
} while ( 0 )
#endif
#define adjust_guest_l2e(pl2e, d) \
do { \
if ( likely(l2e_get_flags((pl2e)) & _PAGE_PRESENT) && \
- likely(!IS_COMPAT(d)) ) \
+ likely(!is_pv_32on64_domain(d)) ) \
l2e_add_flags((pl2e), _PAGE_USER); \
} while ( 0 )
-#define adjust_guest_l3e(pl3e, d) \
- do { \
- if ( likely(l3e_get_flags((pl3e)) & _PAGE_PRESENT) ) \
- l3e_add_flags((pl3e), likely(!IS_COMPAT(d)) ? \
- _PAGE_USER : \
- _PAGE_USER|_PAGE_RW); \
+#define adjust_guest_l3e(pl3e, d) \
+ do { \
+ if ( likely(l3e_get_flags((pl3e)) & _PAGE_PRESENT) ) \
+ l3e_add_flags((pl3e), likely(!is_pv_32on64_domain(d)) ? \
+ _PAGE_USER : \
+ _PAGE_USER|_PAGE_RW); \
} while ( 0 )
#define adjust_guest_l4e(pl4e, d) \
do { \
if ( likely(l4e_get_flags((pl4e)) & _PAGE_PRESENT) && \
- likely(!IS_COMPAT(d)) ) \
+ likely(!is_pv_32on64_domain(d)) ) \
l4e_add_flags((pl4e), _PAGE_USER); \
} while ( 0 )
#endif
#ifdef CONFIG_COMPAT
-#define unadjust_guest_l3e(pl3e, d) \
- do { \
- if ( unlikely(IS_COMPAT(d)) && \
- likely(l3e_get_flags((pl3e)) & _PAGE_PRESENT) ) \
- l3e_remove_flags((pl3e), _PAGE_USER|_PAGE_RW|_PAGE_ACCESSED); \
+#define unadjust_guest_l3e(pl3e, d) \
+ do { \
+ if ( unlikely(is_pv_32on64_domain(d)) && \
+ likely(l3e_get_flags((pl3e)) & _PAGE_PRESENT) ) \
+ l3e_remove_flags((pl3e), _PAGE_USER|_PAGE_RW|_PAGE_ACCESSED); \
} while ( 0 )
#else
#define unadjust_guest_l3e(_p, _d) ((void)(_d))
#ifndef CONFIG_COMPAT
l2_pgentry_t l2e;
int i;
-#else
+#endif
- if ( !IS_COMPAT(d) )
+ if ( !is_pv_32bit_domain(d) )
return 1;
-#endif
pl3e = (l3_pgentry_t *)((unsigned long)pl3e & PAGE_MASK);
* 512 entries must be valid/verified, which is most easily achieved
* by clearing them out.
*/
- if ( IS_COMPAT(d) )
+ if ( is_pv_32on64_domain(d) )
memset(pl3e + 4, 0, (L3_PAGETABLE_ENTRIES - 4) * sizeof(*pl3e));
for ( i = 0; i < L3_PAGETABLE_ENTRIES; i++ )
{
#if defined(CONFIG_X86_PAE) || defined(CONFIG_COMPAT)
- if ( (CONFIG_PAGING_LEVELS < 4 || IS_COMPAT(d)) && i == 3 )
+ if ( is_pv_32bit_domain(d) && (i == 3) )
{
if ( !(l3e_get_flags(pl3e[i]) & _PAGE_PRESENT) ||
(l3e_get_flags(pl3e[i]) & l3_disallow_mask(d)) ||
pl4e[l4_table_offset(PERDOMAIN_VIRT_START)] =
l4e_from_page(virt_to_page(d->arch.mm_perdomain_l3),
__PAGE_HYPERVISOR);
- if ( IS_COMPAT(d) )
+ if ( is_pv_32on64_domain(d) )
pl4e[l4_table_offset(COMPAT_ARG_XLAT_VIRT_BASE)] =
l4e_from_page(virt_to_page(d->arch.mm_arg_xlat_l3),
__PAGE_HYPERVISOR);
* Disallow updates to final L3 slot. It contains Xen mappings, and it
* would be a pain to ensure they remain continuously valid throughout.
*/
- if ( (CONFIG_PAGING_LEVELS < 4 || IS_COMPAT(d)) &&
- pgentry_ptr_to_slot(pl3e) >= 3 )
+ if ( is_pv_32bit_domain(d) && (pgentry_ptr_to_slot(pl3e) >= 3) )
return 0;
#endif
unsigned long old_base_mfn;
#ifdef CONFIG_COMPAT
- if ( IS_COMPAT(d) )
+ if ( is_pv_32on64_domain(d) )
{
okay = paging_mode_refcounts(d)
? 0 /* Old code was broken, but what should it be? */
goto pin_page;
case MMUEXT_PIN_L4_TABLE:
- if ( IS_COMPAT(FOREIGNDOM) )
+ if ( is_pv_32bit_domain(FOREIGNDOM) )
break;
type = PGT_l4_page_table;
flush_tlb_mask(d->domain_dirty_cpumask);
break;
default:
- if ( unlikely(!IS_COMPAT(d) ?
+ if ( unlikely(!is_pv_32on64_domain(d) ?
get_user(vmask, (unsigned long *)bmap_ptr) :
get_user(vmask, (unsigned int *)bmap_ptr)) )
rc = -EFAULT;
flush_tlb_one_mask(d->domain_dirty_cpumask, va);
break;
default:
- if ( unlikely(!IS_COMPAT(d) ?
+ if ( unlikely(!is_pv_32on64_domain(d) ?
get_user(vmask, (unsigned long *)bmap_ptr) :
get_user(vmask, (unsigned int *)bmap_ptr)) )
rc = -EFAULT;
nl1e = l1e_from_intpte(val);
if ( unlikely(!get_page_from_l1e(gl1e_to_ml1e(d, nl1e), d)) )
{
- if ( (CONFIG_PAGING_LEVELS == 3 || IS_COMPAT(d)) &&
+ if ( (CONFIG_PAGING_LEVELS >= 3) && is_pv_32bit_domain(d) &&
(bytes == 4) && (addr & 4) && !do_cmpxchg &&
(l1e_get_flags(nl1e) & _PAGE_PRESENT) )
{
ptwr_ctxt.ctxt.regs = regs;
ptwr_ctxt.ctxt.addr_size = ptwr_ctxt.ctxt.sp_size =
- IS_COMPAT(d) ? 32 : BITS_PER_LONG;
+ is_pv_32on64_domain(d) ? 32 : BITS_PER_LONG;
ptwr_ctxt.cr2 = addr;
ptwr_ctxt.pte = pte;
t == SH_type_fl1_pae_shadow ||
t == SH_type_fl1_64_shadow ||
t == SH_type_monitor_table ||
- (pv_32on64_vcpu(v) && t == SH_type_l4_64_shadow) ||
+ (is_pv_32on64_vcpu(v) && t == SH_type_l4_64_shadow) ||
(page_get_owner(mfn_to_page(_mfn(sp->backpointer)))
== v->domain));
SHADOW_INTERNAL_NAME(sh_destroy_l1_shadow, 4, 4)(v, smfn);
break;
case SH_type_l2h_64_shadow:
- ASSERT(pv_32on64_vcpu(v));
+ ASSERT(is_pv_32on64_vcpu(v));
/* Fall through... */
case SH_type_l2_64_shadow:
SHADOW_INTERNAL_NAME(sh_destroy_l2_shadow, 4, 4)(v, smfn);
/* 32bit PV guests on 64bit xen behave like older 64bit linux: they
* change an l4e instead of cr3 to switch tables. Give them the
* same optimization */
- if ( pv_32on64_domain(d) )
+ if ( is_pv_32on64_domain(d) )
d->arch.paging.shadow.opt_flags = SHOPT_LINUX_L3_TOPLEVEL;
#endif
shadow_type, mfn_x(smfn));
/* 32-on-64 PV guests don't own their l4 pages so can't get_page them */
- if ( !pv_32on64_vcpu(v) || shadow_type != SH_type_l4_64_shadow )
+ if ( !is_pv_32on64_vcpu(v) || shadow_type != SH_type_l4_64_shadow )
{
res = get_page(mfn_to_page(gmfn), d);
ASSERT(res == 1);
mfn_x(gmfn), shadow_type, mfn_x(smfn));
shadow_hash_delete(v, mfn_x(gmfn), shadow_type, smfn);
/* 32-on-64 PV guests don't own their l4 pages; see set_shadow_status */
- if ( !pv_32on64_vcpu(v) || shadow_type != SH_type_l4_64_shadow )
+ if ( !is_pv_32on64_vcpu(v) || shadow_type != SH_type_l4_64_shadow )
put_page(mfn_to_page(gmfn));
}
// PV guests in 64-bit mode use two different page tables for user vs
// supervisor permissions, making the guest's _PAGE_USER bit irrelevant.
// It is always shadowed as present...
- if ( (GUEST_PAGING_LEVELS == 4) && !pv_32on64_domain(d)
+ if ( (GUEST_PAGING_LEVELS == 4) && !is_pv_32on64_domain(d)
&& !is_hvm_domain(d) )
{
sflags |= _PAGE_USER;
for ( _i = 0; _i < SHADOW_L2_PAGETABLE_ENTRIES; _i++ ) \
{ \
if ( (!(_xen)) \
- || !pv_32on64_domain(_dom) \
+ || !is_pv_32on64_domain(_dom) \
|| mfn_to_shadow_page(_sl2mfn)->type != SH_type_l2h_64_shadow \
|| (_i < COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(_dom)) ) \
{ \
__PAGE_HYPERVISOR);
}
- if ( pv_32on64_domain(v->domain) )
+ if ( is_pv_32on64_domain(v->domain) )
{
/* install compat arg xlat entry */
sl4e[shadow_l4_table_offset(COMPAT_ARG_XLAT_VIRT_BASE)] =
int i;
#else
- if ( !pv_32on64_vcpu(v) )
+ if ( !is_pv_32on64_vcpu(v) )
return;
#endif
l4e = sh_map_domain_page(m4mfn);
l4e[0] = l4e_from_pfn(mfn_x(m3mfn), __PAGE_HYPERVISOR);
sh_unmap_domain_page(l4e);
- if ( pv_32on64_vcpu(v) )
+ if ( is_pv_32on64_vcpu(v) )
{
// Install a monitor l2 table in slot 3 of the l3 table.
// This is used for all Xen entries.
unsigned int t = SH_type_l2_shadow;
/* Tag compat L2 containing hypervisor (m2p) mappings */
- if ( pv_32on64_domain(v->domain) &&
+ if ( is_pv_32on64_domain(v->domain) &&
guest_l4_table_offset(gw->va) == 0 &&
guest_l3_table_offset(gw->va) == 3 )
t = SH_type_l2h_shadow;
l4_pgentry_t *l4e = sh_map_domain_page(mmfn);
ASSERT(l4e_get_flags(l4e[0]) & _PAGE_PRESENT);
m3mfn = _mfn(l4e_get_pfn(l4e[0]));
- if ( pv_32on64_vcpu(v) )
+ if ( is_pv_32on64_vcpu(v) )
{
/* Need to destroy the l2 monitor page in slot 3 too */
l3_pgentry_t *l3e = sh_map_domain_page(m3mfn);
(unsigned long)pagetable_get_pfn(v->arch.guest_table));
#if GUEST_PAGING_LEVELS == 4
- if ( !(v->arch.flags & TF_kernel_mode) && !pv_32on64_vcpu(v) )
+ if ( !(v->arch.flags & TF_kernel_mode) && !is_pv_32on64_vcpu(v) )
gmfn = pagetable_get_mfn(v->arch.guest_table_user);
else
#endif
mfn = shadow_l3e_get_mfn(*sl3e);
gmfn = get_shadow_status(v, audit_gfn_to_mfn(v, gfn, gl3mfn),
((GUEST_PAGING_LEVELS == 3 ||
- pv_32on64_vcpu(v))
+ is_pv_32on64_vcpu(v))
&& !shadow_mode_external(v->domain)
&& (guest_index(gl3e) % 4) == 3)
? SH_type_l2h_shadow
if ( is_hvm_vcpu(current) )
return;
- if ( IS_COMPAT(container_of(regs, struct cpu_info, guest_cpu_user_regs)->current_vcpu->domain) )
+ if ( is_pv_32on64_vcpu(current) )
{
compat_show_guest_stack(regs, debug_stack_lines);
return;
break;
case 3: /* Read CR3 */
- if ( !IS_COMPAT(v->domain) )
+ if ( !is_pv_32on64_vcpu(v) )
*reg = xen_pfn_to_cr3(mfn_to_gmfn(
v->domain, pagetable_get_pfn(v->arch.guest_table)));
#ifdef CONFIG_COMPAT
case 3: /* Write CR3 */
LOCK_BIGLOCK(v->domain);
- if ( !IS_COMPAT(v->domain) )
+ if ( !is_pv_32on64_vcpu(v) )
rc = new_guest_cr3(gmfn_to_mfn(v->domain, xen_cr3_to_pfn(*reg)));
#ifdef CONFIG_COMPAT
else
{
#ifdef CONFIG_X86_64
case MSR_FS_BASE:
- if ( IS_COMPAT(v->domain) )
+ if ( is_pv_32on64_vcpu(v) )
goto fail;
if ( wrmsr_safe(MSR_FS_BASE, regs->eax, regs->edx) )
goto fail;
((u64)regs->edx << 32) | regs->eax;
break;
case MSR_GS_BASE:
- if ( IS_COMPAT(v->domain) )
+ if ( is_pv_32on64_vcpu(v) )
goto fail;
if ( wrmsr_safe(MSR_GS_BASE, regs->eax, regs->edx) )
goto fail;
((u64)regs->edx << 32) | regs->eax;
break;
case MSR_SHADOW_GS_BASE:
- if ( IS_COMPAT(v->domain) )
+ if ( is_pv_32on64_vcpu(v) )
goto fail;
if ( wrmsr_safe(MSR_SHADOW_GS_BASE, regs->eax, regs->edx) )
goto fail;
{
#ifdef CONFIG_X86_64
case MSR_FS_BASE:
- if ( IS_COMPAT(v->domain) )
+ if ( is_pv_32on64_vcpu(v) )
goto fail;
regs->eax = v->arch.guest_context.fs_base & 0xFFFFFFFFUL;
regs->edx = v->arch.guest_context.fs_base >> 32;
break;
case MSR_GS_BASE:
- if ( IS_COMPAT(v->domain) )
+ if ( is_pv_32on64_vcpu(v) )
goto fail;
regs->eax = v->arch.guest_context.gs_base_kernel & 0xFFFFFFFFUL;
regs->edx = v->arch.guest_context.gs_base_kernel >> 32;
break;
case MSR_SHADOW_GS_BASE:
- if ( IS_COMPAT(v->domain) )
+ if ( is_pv_32on64_vcpu(v) )
goto fail;
regs->eax = v->arch.guest_context.gs_base_user & 0xFFFFFFFFUL;
regs->edx = v->arch.guest_context.gs_base_user >> 32;
OFFSET(VCPU_vmx_cr2, struct vcpu, arch.hvm_vmx.cpu_cr2);
BLANK();
- OFFSET(DOMAIN_is_compat, struct domain, is_compat);
+ OFFSET(DOMAIN_is_32bit_pv, struct domain, arch.is_32bit_pv);
BLANK();
OFFSET(VMCB_rax, struct vmcb_struct, rax);
jz int80_slow_path
movq VCPU_domain(%rbx),%rax
- testb $1,DOMAIN_is_compat(%rax)
+ testb $1,DOMAIN_is_32bit_pv(%rax)
jnz compat_int80_direct_trap
call create_bounce_frame
# create_bounce_frame() temporarily clobbers CS.RPL. Fix up.
movq CPUINFO_current_vcpu(%rax),%rax
movq VCPU_domain(%rax),%rax
- testb $1,DOMAIN_is_compat(%rax)
+ testb $1,DOMAIN_is_32bit_pv(%rax)
setz %al
leal (%rax,%rax,2),%eax
orb %al,UREGS_cs(%rsp)
testb $3,UREGS_cs(%rsp)
jz restore_all_xen
movq VCPU_domain(%rbx),%rax
- testb $1,DOMAIN_is_compat(%rax)
+ testb $1,DOMAIN_is_32bit_pv(%rax)
jz test_all_events
jmp compat_test_all_events
jz restore_all_xen
leaq VCPU_trap_bounce(%rbx),%rdx
movq VCPU_domain(%rbx),%rax
- testb $1,DOMAIN_is_compat(%rax)
+ testb $1,DOMAIN_is_32bit_pv(%rax)
jnz compat_post_handle_exception
testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%rdx)
jz test_all_events
/* All code and data segments are okay. No base/limit checking. */
if ( (b & _SEGMENT_S) )
{
- if ( !IS_COMPAT(dom) || !(b & _SEGMENT_L) )
- goto good;
- goto bad;
+ if ( is_pv_32bit_domain(dom) && (b & _SEGMENT_L) )
+ goto bad;
+ goto good;
}
/* Invalid type 0 is harmless. It is used for 2nd half of a call gate. */
void toggle_guest_mode(struct vcpu *v)
{
- if ( IS_COMPAT(v->domain) )
+ if ( is_pv_32bit_vcpu(v) )
return;
v->arch.flags ^= TF_kernel_mode;
__asm__ __volatile__ ( "swapgs" );
{
if ( is_hvm_domain(d) )
hvm_hypercall_page_initialise(d, hypercall_page);
- else if ( !IS_COMPAT(d) )
+ else if ( !is_pv_32bit_domain(d) )
hypercall_page_initialise_ring3_kernel(hypercall_page);
else
hypercall_page_initialise_ring1_kernel(hypercall_page);
#define load_TR(n) __asm__ __volatile__ ("ltr %%ax" : : "a" (__TSS(n)<<3) )
#if defined(__x86_64__)
-#define GUEST_KERNEL_RPL(d) (!IS_COMPAT(d) ? 3 : 1)
+#define GUEST_KERNEL_RPL(d) (is_pv_32bit_domain(d) ? 1 : 3)
#elif defined(__i386__)
#define GUEST_KERNEL_RPL(d) ((void)(d), 1)
#endif
*/
#define guest_gate_selector_okay(d, sel) \
((((sel)>>3) < FIRST_RESERVED_GDT_ENTRY) || /* Guest seg? */ \
- ((sel) == (!IS_COMPAT(d) ? \
+ ((sel) == (!is_pv_32on64_domain(d) ? \
FLAT_KERNEL_CS : /* Xen default seg? */ \
FLAT_COMPAT_KERNEL_CS)) || \
((sel) & 4)) /* LDT seg? */
#include <asm/hvm/domain.h>
#include <asm/e820.h>
+#define has_32bit_shinfo(d) ((d)->arch.has_32bit_shinfo)
+#define is_pv_32bit_domain(d) ((d)->arch.is_32bit_pv)
+#define is_pv_32bit_vcpu(v) (is_pv_32bit_domain((v)->domain))
#ifdef __x86_64__
-#define pv_32bit_vcpu(v) (!is_hvm_vcpu(v) && IS_COMPAT((v)->domain))
-#define pv_32bit_domain(d) (!is_hvm_domain(d) && IS_COMPAT(d))
-#define pv_32on64_vcpu(v) (pv_32bit_vcpu(v))
-#define pv_32on64_domain(d) (pv_32bit_domain(d))
+#define is_pv_32on64_domain(d) (is_pv_32bit_domain(d))
#else
-#define pv_32bit_vcpu(v) (!is_hvm_vcpu(v))
-#define pv_32bit_domain(d) (!is_hvm_domain(d))
-#define pv_32on64_vcpu(v) (0)
-#define pv_32on64_domain(d) (0)
+#define is_pv_32on64_domain(d) (0)
#endif
-
+#define is_pv_32on64_vcpu(v) (is_pv_32on64_domain((v)->domain))
+#define IS_COMPAT(d) (is_pv_32on64_domain(d))
struct trap_bounce {
uint32_t error_code;
/* Maximum physical-address bitwidth supported by this guest. */
unsigned int physaddr_bitsize;
+
+ /* Is a 32-bit PV (non-HVM) guest? */
+ bool_t is_32bit_pv;
+ /* Is shared-info page in 32-bit format? */
+ bool_t has_32bit_shinfo;
} __cacheline_aligned;
#ifdef CONFIG_X86_PAE
else
{
cpu = smp_processor_id();
- desc = (!IS_COMPAT(v->domain) ? gdt_table : compat_gdt_table)
+ desc = (!is_pv_32on64_vcpu(v) ? gdt_table : compat_gdt_table)
+ __LDT(cpu) - FIRST_RESERVED_GDT_ENTRY;
_set_tssldt_desc(desc, LDT_VIRT_START(v), ents*8-1, 2);
__asm__ __volatile__ ( "lldt %%ax" : : "a" (__LDT(cpu)<<3) );
#ifdef CONFIG_COMPAT
-#define nmi_reason(d) (!IS_COMPAT(d) ? \
+#define nmi_reason(d) (!has_32bit_shinfo(d) ? \
(void *)&(d)->shared_info->native.arch.nmi_reason : \
(void *)&(d)->shared_info->compat.arch.nmi_reason)
-#define GET_SET_SHARED(type, field) \
-static inline type arch_get_##field(const struct domain *d) \
-{ \
- return !IS_COMPAT(d) ? \
- d->shared_info->native.arch.field : \
- d->shared_info->compat.arch.field; \
-} \
-static inline void arch_set_##field(struct domain *d, \
- type val) \
-{ \
- if ( !IS_COMPAT(d) ) \
- d->shared_info->native.arch.field = val; \
- else \
- d->shared_info->compat.arch.field = val; \
+#define GET_SET_SHARED(type, field) \
+static inline type arch_get_##field(const struct domain *d) \
+{ \
+ return !has_32bit_shinfo(d) ? \
+ d->shared_info->native.arch.field : \
+ d->shared_info->compat.arch.field; \
+} \
+static inline void arch_set_##field(struct domain *d, \
+ type val) \
+{ \
+ if ( !has_32bit_shinfo(d) ) \
+ d->shared_info->native.arch.field = val; \
+ else \
+ d->shared_info->compat.arch.field = val; \
}
-#define GET_SET_VCPU(type, field) \
-static inline type arch_get_##field(const struct vcpu *v) \
-{ \
- return !IS_COMPAT(v->domain) ? \
- v->vcpu_info->native.arch.field : \
- v->vcpu_info->compat.arch.field; \
-} \
-static inline void arch_set_##field(struct vcpu *v, \
- type val) \
-{ \
- if ( !IS_COMPAT(v->domain) ) \
- v->vcpu_info->native.arch.field = val; \
- else \
- v->vcpu_info->compat.arch.field = val; \
+#define GET_SET_VCPU(type, field) \
+static inline type arch_get_##field(const struct vcpu *v) \
+{ \
+ return !has_32bit_shinfo(v->domain) ? \
+ v->vcpu_info->native.arch.field : \
+ v->vcpu_info->compat.arch.field; \
+} \
+static inline void arch_set_##field(struct vcpu *v, \
+ type val) \
+{ \
+ if ( !has_32bit_shinfo(v->domain) ) \
+ v->vcpu_info->native.arch.field = val; \
+ else \
+ v->vcpu_info->compat.arch.field = val; \
}
#else
#define nmi_reason(d) ((void *)&(d)->shared_info->arch.nmi_reason)
-#define GET_SET_SHARED(type, field) \
-static inline type arch_get_##field(const struct domain *d) \
-{ \
- return d->shared_info->arch.field; \
-} \
-static inline void arch_set_##field(struct domain *d, \
- type val) \
-{ \
- d->shared_info->arch.field = val; \
+#define GET_SET_SHARED(type, field) \
+static inline type arch_get_##field(const struct domain *d) \
+{ \
+ return d->shared_info->arch.field; \
+} \
+static inline void arch_set_##field(struct domain *d, \
+ type val) \
+{ \
+ d->shared_info->arch.field = val; \
}
-#define GET_SET_VCPU(type, field) \
-static inline type arch_get_##field(const struct vcpu *v) \
-{ \
- return v->vcpu_info->arch.field; \
-} \
-static inline void arch_set_##field(struct vcpu *v, \
- type val) \
-{ \
- v->vcpu_info->arch.field = val; \
+#define GET_SET_VCPU(type, field) \
+static inline type arch_get_##field(const struct vcpu *v) \
+{ \
+ return v->vcpu_info->arch.field; \
+} \
+static inline void arch_set_##field(struct vcpu *v, \
+ type val) \
+{ \
+ v->vcpu_info->arch.field = val; \
}
#endif
#define is_guest_l1_slot(_s) (1)
#define is_guest_l2_slot(_d, _t, _s) \
- ( !IS_COMPAT(_d) || \
+ ( !is_pv_32bit_domain(_d) || \
!((_t) & PGT_pae_xen_l2) || \
((_s) < COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(_d)) )
#define is_guest_l3_slot(_s) (1)
#define is_guest_l4_slot(_d, _s) \
- ( IS_COMPAT(_d) \
+ ( is_pv_32bit_domain(_d) \
? ((_s) == 0) \
: (((_s) < ROOT_PAGETABLE_FIRST_XEN_SLOT) || \
((_s) > ROOT_PAGETABLE_LAST_XEN_SLOT)))
#define ring_2(r) (((r)->cs & 3) == 2)
#define ring_3(r) (((r)->cs & 3) == 3)
-#define guest_kernel_mode(v, r) \
- (!IS_COMPAT((v)->domain) ? \
- ring_3(r) && ((v)->arch.flags & TF_kernel_mode) : \
- ring_1(r))
+#define guest_kernel_mode(v, r) \
+ (!is_pv_32bit_vcpu(v) ? \
+ (ring_3(r) && ((v)->arch.flags & TF_kernel_mode)) : \
+ (ring_1(r)))
#define permit_softint(dpl, v, r) \
((dpl) >= (guest_kernel_mode(v, r) ? 1 : 3))
/* Check for null trap callback handler: Is the EIP null? */
#define null_trap_bounce(v, tb) \
- (!IS_COMPAT((v)->domain) ? (tb)->eip == 0 : ((tb)->cs & ~3) == 0)
+ (!is_pv_32bit_vcpu(v) ? ((tb)->eip == 0) : (((tb)->cs & ~3) == 0))
/* Number of bytes of on-stack execution state to be context-switched. */
/* NB. Segment registers and bases are not saved/restored on x86/64 stack. */
bool_t is_privileged;
/* Is this guest being debugged by dom0? */
bool_t debugger_attached;
- /* Is a 'compatibility mode' guest (semantics are arch specific)? */
- bool_t is_compat;
/* Are any VCPUs polling event channels (SCHEDOP_poll)? */
bool_t is_polling;
/* Is this guest dying (i.e., a zombie)? */
#define IS_PRIV(_d) ((_d)->is_privileged)
-#ifdef CONFIG_COMPAT
-#define IS_COMPAT(_d) ((_d)->is_compat)
-#else
-#define IS_COMPAT(_d) 0
+#ifndef IS_COMPAT
+#define IS_COMPAT(d) 0
#endif
#define VM_ASSIST(_d,_t) (test_bit((_t), &(_d)->vm_assist))
struct compat_shared_info compat;
} shared_info_t;
-#define __shared_info(d, s, field) (*(!IS_COMPAT(d) ? \
- &(s)->native.field : \
+#define __shared_info(d, s, field) (*(!has_32bit_shinfo(d) ? \
+ &(s)->native.field : \
&(s)->compat.field))
-#define __shared_info_addr(d, s, field) (!IS_COMPAT(d) ? \
- (void *)&(s)->native.field : \
+#define __shared_info_addr(d, s, field) (!has_32bit_shinfo(d) ? \
+ (void *)&(s)->native.field : \
(void *)&(s)->compat.field)
-#define shared_info(d, field) __shared_info(d, (d)->shared_info, field)
-#define shared_info_addr(d, field) __shared_info_addr(d, (d)->shared_info, field)
+#define shared_info(d, field) \
+ __shared_info(d, (d)->shared_info, field)
+#define shared_info_addr(d, field) \
+ __shared_info_addr(d, (d)->shared_info, field)
typedef union {
struct vcpu_info native;
struct compat_vcpu_info compat;
} vcpu_info_t;
-#define vcpu_info(v, field) (*(!IS_COMPAT((v)->domain) ? \
- &(v)->vcpu_info->native.field : \
+#define vcpu_info(v, field) (*(!has_32bit_shinfo((v)->domain) ? \
+ &(v)->vcpu_info->native.field : \
&(v)->vcpu_info->compat.field))
-#define vcpu_info_addr(v, field) (!IS_COMPAT((v)->domain) ? \
+#define vcpu_info_addr(v, field) (!has_32bit_shinfo((v)->domain) ? \
(void *)&(v)->vcpu_info->native.field : \
(void *)&(v)->vcpu_info->compat.field)